void queue_l1_entry_update(pte_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ _flush_page_update_queue();
+ *(unsigned long *)ptr = val;
}
void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
+ _flush_page_update_queue();
+ *(unsigned long *)ptr = val;
}
void queue_pt_switch(unsigned long ptr)
/* queue and flush versions of the above */
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ *(unsigned long *)ptr = val;
}
void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ *(unsigned long *)ptr = val;
}
void xen_pt_switch(unsigned long ptr)
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(page_table);
+ //make_page_readonly(page_table);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
* it. We clean up by write-enabling and then freeing the old page dir.
*/
memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
- make_page_readonly(new_pgd);
+ //make_page_readonly(new_pgd);
queue_pgd_pin(__pa(new_pgd));
load_cr3(new_pgd);
queue_pgd_unpin(__pa(old_pgd));
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
- make_page_readonly(pte);
+ //make_page_readonly(pte);
xen_flush_page_update_queue();
}
return pte;
set_page_count(page, 1);
clear_page(pte);
- make_page_readonly(pte);
+ //make_page_readonly(pte);
queue_pte_pin(__pa(pte));
flush_page_update_queue();
}
spin_unlock_irqrestore(&pgd_lock, flags);
memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
out:
- make_page_readonly(pgd);
+ //make_page_readonly(pgd);
queue_pgd_pin(__pa(pgd));
flush_page_update_queue();
}